First, let's set up some environmental dependencies. These just make the numerics easier and adjust some of the plotting defaults to make things more legible.
In [1]:
# Python 3 compatability
from __future__ import division, print_function
from six.moves import range
# system functions that are always useful to have
import time, sys, os
# basic numeric setup
import numpy as np
from numpy import linalg
# inline plotting
%matplotlib inline
# plotting
import matplotlib
from matplotlib import pyplot as plt
# seed the random number generator
np.random.seed(21)
In [2]:
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'font.size': 30})
In [3]:
import dynesty
dynesty
supports three tiers of sampling techniques: uniform sampling for low dimensional problems, random walks for low-to-moderate dimensional problems, and slice sampling for high-dimensional problems (with or without the use of gradients). Here we will quickly demonstrate that slice sampling is able to cope with high-dimensional problems using a 25-D correlated multivariate normal distribution.
In [4]:
ndim = 25 # number of dimensions
C = np.identity(ndim) # set covariance to identity matrix
C[C==0] = 0.4 # set off-diagonal terms (strongly correlated)
Cinv = linalg.inv(C) # precision matrix
lnorm = -0.5 * (np.log(2 * np.pi) * ndim + np.log(linalg.det(C))) # ln(normalization)
# 3-D correlated multivariate normal log-likelihood
def loglikelihood(x):
"""Multivariate normal log-likelihood."""
return -0.5 * np.dot(x, np.dot(Cinv, x)) + lnorm
# prior transform
def prior_transform(u):
"""Transforms our unit cube samples `u` to a flat prior between -5. and 5. in each variable."""
return 5. * (2. * u - 1.)
# gradient of log-likelihood *with respect to u*
def gradient(x):
"""Multivariate normal log-likelihood gradient."""
dlnl_dv = -np.dot(Cinv, x) # standard gradient
jac = np.diag(np.full_like(x, 10.)) # Jacobian
return np.dot(jac, dlnl_dv) # transformed gradient
# ln(evidence)
lnz_truth = -ndim * np.log(10. * 0.999999426697)
print(lnz_truth)
Since we know this is a unimodal case, we'll initialize our sampler in the 'single'
bounding mode.
In [5]:
# multivariate slice sampling ('slice')
sampler = dynesty.NestedSampler(loglikelihood, prior_transform, ndim,
nlive=500, bound='single',
sample='slice', slices=5)
sampler.run_nested(dlogz=0.01)
res = sampler.results
In [6]:
# random slice sampling ('rslice')
sampler = dynesty.NestedSampler(loglikelihood, prior_transform, ndim,
nlive=500, bound='single',
sample='rslice', slices=25)
sampler.run_nested(dlogz=0.01)
res2 = sampler.results
In [7]:
# hamiltonian slice sampling ('hslice')
sampler = dynesty.NestedSampler(loglikelihood, prior_transform, ndim,
nlive=500, bound='single',
sample='hslice', slices=5,
gradient=gradient)
sampler.run_nested(dlogz=0.01)
res3 = sampler.results
Now let's see how we do.
In [8]:
from dynesty import plotting as dyplot
fig, axes = dyplot.runplot(res, color='navy', logplot=True,
lnz_truth=lnz_truth, truth_color='black')
fig, axes = dyplot.runplot(res2, color='darkorange', logplot=True, fig=(fig, axes))
fig, axes = dyplot.runplot(res3, color='limegreen', logplot=True, fig=(fig, axes))
fig.tight_layout()
In [9]:
from dynesty import utils as dyfunc
print('Mean:')
print('slice:', dyfunc.mean_and_cov(res.samples,
np.exp(res.logwt-res.logz[-1]))[0])
print('rslice:', dyfunc.mean_and_cov(res2.samples,
np.exp(res2.logwt-res2.logz[-1]))[0])
print('hslice:', dyfunc.mean_and_cov(res3.samples,
np.exp(res3.logwt-res3.logz[-1]))[0])
print('\nVariance:')
print('slice:', np.diagonal(dyfunc.mean_and_cov(res.samples,
np.exp(res.logwt-res.logz[-1]))[1]))
print('rslice:', np.diagonal(dyfunc.mean_and_cov(res2.samples,
np.exp(res2.logwt-res2.logz[-1]))[1]))
print('hslice:', np.diagonal(dyfunc.mean_and_cov(res3.samples,
np.exp(res3.logwt-res3.logz[-1]))[1]))